#
CONFIG_XEN_PRIVILEGED_GUEST=y
CONFIG_XEN_PHYSDEV_ACCESS=y
-CONFIG_XEN_BLKDEV_BACKEND=y
+# CONFIG_XEN_BLKDEV_BACKEND is not set
# CONFIG_XEN_BLKDEV_TAP_BE is not set
-CONFIG_XEN_NETDEV_BACKEND=y
+# CONFIG_XEN_NETDEV_BACKEND is not set
CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_NETDEV_FRONTEND=y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
for (va = gdt_descr->address, f = 0;
va < gdt_descr->address + gdt_descr->size;
va += PAGE_SIZE, f++) {
- frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
+ frames[f] = __vms_virt_to_machine(va) >> PAGE_SHIFT;
make_page_readonly((void *)va);
}
flush_page_update_queue();
cpumask_t mask;
preempt_disable();
#endif
+#if 0
make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
PAGE_SIZE);
+#endif
load_LDT(pc);
flush_page_update_queue();
#ifdef CONFIG_SMP
#endif
}
if (oldsize) {
+#if 0
make_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
PAGE_SIZE);
+#endif
flush_page_update_queue();
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(oldldt);
if (err < 0)
return err;
memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
+#if 0
make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
PAGE_SIZE);
+#endif
flush_page_update_queue();
return 0;
}
if (mm->context.size) {
if (mm == current->active_mm)
clear_LDT();
+#if 0
make_pages_writable(mm->context.ldt,
(mm->context.size * LDT_ENTRY_SIZE) /
PAGE_SIZE);
+#endif
flush_page_update_queue();
if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(mm->context.ldt);
}
lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
- mach_lp = arbitrary_virt_to_machine(lp);
+ mach_lp = arbitrary_virt_to_phys(lp);
/* Allow LDTs to be cleared by the user. */
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
static void
xen_contig_memory(unsigned long vstart, unsigned int order)
{
+#define HACK
+#ifndef HACK
/*
* Ensure multi-page extents are contiguous in machine memory.
* This code could be cleaned up some, and the number of
xen_tlb_flush();
balloon_unlock(flags);
+#endif
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
next->tls_array[i].b != prev->tls_array[i].b)) \
queue_multicall3(__HYPERVISOR_update_descriptor, \
- virt_to_machine(&get_cpu_gdt_table(cpu) \
+ virt_to_phys(&get_cpu_gdt_table(cpu) \
[GDT_ENTRY_TLS_MIN + i]), \
((u32 *)&next->tls_array[i])[0], \
((u32 *)&next->tls_array[i])[1]); \
shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
EXPORT_SYMBOL(HYPERVISOR_shared_info);
-unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
-EXPORT_SYMBOL(phys_to_machine_mapping);
+unsigned int *__vms_phys_to_machine_mapping, *__vms_pfn_to_mfn_frame_list;
+EXPORT_SYMBOL(__vms_phys_to_machine_mapping);
DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
DEFINE_PER_CPU(int, nr_multicall_ents);
}
#endif
- phys_to_machine_mapping = (unsigned int *)xen_start_info.mfn_list;
+ __vms_phys_to_machine_mapping = (unsigned int *)xen_start_info.mfn_list;
return max_low_pfn;
}
/* Make sure we have a large enough P->M table. */
if (max_pfn > xen_start_info.nr_pages) {
- phys_to_machine_mapping = alloc_bootmem_low_pages(
+ __vms_phys_to_machine_mapping = alloc_bootmem_low_pages(
max_pfn * sizeof(unsigned long));
- memset(phys_to_machine_mapping, ~0,
+ memset(__vms_phys_to_machine_mapping, ~0,
max_pfn * sizeof(unsigned long));
- memcpy(phys_to_machine_mapping,
+ memcpy(__vms_phys_to_machine_mapping,
(unsigned long *)xen_start_info.mfn_list,
xen_start_info.nr_pages * sizeof(unsigned long));
free_bootmem(
sizeof(unsigned long))));
}
- pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
+ __vms_pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
{
- pfn_to_mfn_frame_list[j] =
- virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+ __vms_pfn_to_mfn_frame_list[j] =
+ __vms_virt_to_machine(&__vms_phys_to_machine_mapping[i]) >> PAGE_SHIFT;
}
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
- virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+ __vms_virt_to_machine(__vms_pfn_to_mfn_frame_list) >> PAGE_SHIFT;
/*
printk("%08lx\n", regs->eip);
page = ((unsigned long *) per_cpu(cur_pgd, smp_processor_id()))
[address >> 22];
- printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
- machine_to_phys(page));
+ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n",
+ __vms_phys_to_machine(page), page);
/*
* We must not directly access the pte in the highpte
* case, the page table might be allocated in highmem.
if (page & 1) {
page &= PAGE_MASK;
address &= 0x003ff000;
- page = machine_to_phys(page);
page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
- printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
- machine_to_phys(page));
+ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n",
+ __vms_phys_to_machine(page), page);
}
#endif
show_trace(NULL, (unsigned long *)®s[1]);
#ifdef CONFIG_SMP
#define QUEUE_SIZE 1
#else
-#define QUEUE_SIZE 128
+#define QUEUE_SIZE 1
#endif
#endif
void queue_l1_entry_update(pte_t *ptr, unsigned long val)
{
- _flush_page_update_queue();
- *(unsigned long *)ptr = val;
+ set_pte(ptr, __pte(val));
}
void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
{
- _flush_page_update_queue();
- *(unsigned long *)ptr = val;
+ set_pmd(ptr, __pmd(val));
}
void queue_pt_switch(unsigned long ptr)
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
-void queue_pgd_pin(unsigned long ptr)
+void __vms_queue_pgd_pin(unsigned long ptr)
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
-void queue_pgd_unpin(unsigned long ptr)
+void __vms_queue_pgd_unpin(unsigned long ptr)
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
-void queue_pte_pin(unsigned long ptr)
+void __vms_queue_pte_pin(unsigned long ptr)
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
-void queue_pte_unpin(unsigned long ptr)
+void __vms_queue_pte_unpin(unsigned long ptr)
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
increment_index();
/* queue and flush versions of the above */
void xen_l1_entry_update(pte_t *ptr, unsigned long val)
{
- *(unsigned long *)ptr = val;
+ set_pte(ptr, __pte(val));
}
void xen_l2_entry_update(pmd_t *ptr, unsigned long val)
{
- *(unsigned long *)ptr = val;
+ set_pmd(ptr, __pmd(val));
}
void xen_pt_switch(unsigned long ptr)
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR;
increment_index_and_flush();
spin_unlock_irqrestore(&update_lock, flags);
}
-void xen_pgd_pin(unsigned long ptr)
+void __vms_xen_pgd_pin(unsigned long ptr)
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE;
increment_index_and_flush();
spin_unlock_irqrestore(&update_lock, flags);
}
-void xen_pgd_unpin(unsigned long ptr)
+void __vms_xen_pgd_unpin(unsigned long ptr)
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
increment_index_and_flush();
spin_unlock_irqrestore(&update_lock, flags);
}
-void xen_pte_pin(unsigned long ptr)
+void __vms_xen_pte_pin(unsigned long ptr)
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE;
increment_index_and_flush();
spin_unlock_irqrestore(&update_lock, flags);
}
-void xen_pte_unpin(unsigned long ptr)
+void __vms_xen_pte_unpin(unsigned long ptr)
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
increment_index_and_flush();
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
queue_l1_entry_update(pte, 0);
- phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
+ __vms_phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
}
/* Flush updates through and flush the TLB. */
* it. We clean up by write-enabling and then freeing the old page dir.
*/
memcpy(new_pgd, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
- //make_page_readonly(new_pgd);
- queue_pgd_pin(__pa(new_pgd));
load_cr3(new_pgd);
- queue_pgd_unpin(__pa(old_pgd));
__flush_tlb_all(); /* implicit flush */
- make_page_writable(old_pgd);
+ //make_page_writable(old_pgd);
flush_page_update_queue();
free_bootmem(__pa(old_pgd), PAGE_SIZE);
/* Switch to the real shared_info page, and clear the dummy page. */
flush_page_update_queue();
- set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
+ printk("xen_start_info.shared_info=%x\n", xen_start_info.shared_info);
+ set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
memset(empty_zero_page, 0, sizeof(empty_zero_page));
/* Setup mapping of lower 1st MB */
for (i = 0; i < NR_FIX_ISAMAPS; i++)
if (xen_start_info.flags & SIF_PRIVILEGED)
- set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
+ __vms_set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
else
- set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
- virt_to_machine(empty_zero_page));
+ __vms_set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
+ __vms_virt_to_machine(empty_zero_page));
#endif
}
static inline int is_local_lowmem(unsigned long address)
{
extern unsigned long max_low_pfn;
- unsigned long mfn = address >> PAGE_SHIFT;
- unsigned long pfn = mfn_to_pfn(mfn);
- return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
+ unsigned long pfn = address >> PAGE_SHIFT;
+ return (pfn < max_low_pfn);
}
/*
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
+#if 0
if (is_local_lowmem(phys_addr)) {
char *t_addr, *t_end;
struct page *page;
domid = DOMID_LOCAL;
}
+#endif
/*
* Mappings have to be page-aligned
*/
idx = FIX_BTMAP_BEGIN;
while (nrpages > 0) {
- set_fixmap_ma(idx, phys_addr);
+ __vms_set_fixmap_ma(idx, phys_addr);
phys_addr += PAGE_SIZE;
--idx;
--nrpages;
BUG();
do {
- (*v)->ptr = virt_to_machine(pte);
+ (*v)->ptr = __vms_virt_to_machine(pte);
(*v)++;
address += PAGE_SIZE;
pte++;
mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
v = w = &u[0];
- if (domid != DOMID_LOCAL) {
+ if (0 && domid != DOMID_LOCAL) {
u[0].ptr = MMU_EXTENDED_COMMAND;
u[0].val = MMUEXT_SET_FOREIGNDOM;
u[0].val |= (unsigned long)domid << 16;
* Fill in the machine address: PTE ptr is done later by
* __direct_remap_area_pages().
*/
- v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
-
+ {
+ mmu_update_t update;
+ int success = 0;
+ unsigned long ppfn;
+
+ update.ptr = (machine_addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
+ update.val = -1;
+ ppfn = HYPERVISOR_mmu_update(&update, 1, &success);
+ if (! success)
+ BUG();
+
+ v->val = (ppfn << PAGE_SHIFT) | pgprot_val(prot);
+ }
machine_addr += PAGE_SIZE;
address += PAGE_SIZE;
v++;
if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
pte_t old = *kpte;
pte_t standard = mk_pte(page, PAGE_KERNEL);
- set_pte_batched(kpte, mk_pte(page, prot));
+ set_pte_atomic(kpte, mk_pte(page, prot));
if (pte_same(old,standard))
get_page(kpte_page);
} else {
set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
}
} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
- set_pte_batched(kpte, mk_pte(page, PAGE_KERNEL));
+ set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
__put_page(kpte_page);
}
if (err)
break;
}
- flush_page_update_queue();
spin_unlock_irqrestore(&cpa_lock, flags);
return err;
}
* Associate a virtual page frame with a given physical page frame
* and protection flags for that frame.
*/
-static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
+static void __vms_set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
pgprot_t flags)
{
pgd_t *pgd;
}
pte = pte_offset_kernel(pmd, vaddr);
/* <pfn,flags> stored as-is, to permit clearing entries */
- set_pte(pte, pfn_pte_ma(pfn, flags));
+ {
+ mmu_update_t update;
+ int success = 0;
+ unsigned long ppfn;
+
+ update.ptr = (pfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ update.val = -1;
+ ppfn = HYPERVISOR_mmu_update(&update, 1, &success);
+ if (! success)
+ BUG();
+ set_pte(pte, pfn_pte(ppfn, flags));
+ }
/*
* It's enough to flush this one mapping.
set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
}
-void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+void __vms___set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
{
unsigned long address = __fix_to_virt(idx);
BUG();
return;
}
- set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
+ __vms_set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) {
clear_page(pte);
- //make_page_readonly(pte);
- xen_flush_page_update_queue();
}
return pte;
}
set_page_count(page, 1);
clear_page(pte);
- //make_page_readonly(pte);
- queue_pte_pin(__pa(pte));
- flush_page_update_queue();
}
void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
struct page *page = virt_to_page(pte);
ClearPageForeign(page);
- queue_pte_unpin(__pa(pte));
- make_page_writable(pte);
- flush_page_update_queue();
}
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
if (pte < highmem_start_page)
#endif
kmem_cache_free(pte_cache,
- phys_to_virt(page_to_pseudophys(pte)));
+ phys_to_virt(__vms_page_to_pseudophys(pte)));
#ifdef CONFIG_HIGHPTE
else
__free_page(pte);
spin_unlock_irqrestore(&pgd_lock, flags);
memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
out:
- //make_page_readonly(pgd);
- queue_pgd_pin(__pa(pgd));
- flush_page_update_queue();
+ ;
}
/* never called when PTRS_PER_PMD > 1 */
{
unsigned long flags; /* can be called from interrupt context */
- queue_pgd_unpin(__pa(pgd));
- make_page_writable(pgd);
- flush_page_update_queue();
-
if (PTRS_PER_PMD > 1)
return;
pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+#if 0
if ( (unsigned long)va >= (unsigned long)high_memory )
{
unsigned long phys;
- phys = machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
+ phys = __vms_machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
#ifdef CONFIG_HIGHMEM
if ( (phys >> PAGE_SHIFT) < highstart_pfn )
#endif
make_lowmem_page_readonly(phys_to_virt(phys));
}
+#endif
}
void make_page_writable(void *va)
if ( (unsigned long)va >= (unsigned long)high_memory )
{
unsigned long phys;
- phys = machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
+ phys = __vms_machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
#ifdef CONFIG_HIGHMEM
if ( (phys >> PAGE_SHIFT) < highstart_pfn )
#endif
extern void time_suspend(void);
extern void time_resume(void);
extern unsigned long max_pfn;
- extern unsigned int *pfn_to_mfn_frame_list;
+ extern unsigned int *__vms_pfn_to_mfn_frame_list;
suspend_record = (suspend_record_t *)__get_free_page(GFP_KERNEL);
if ( suspend_record == NULL )
memcpy(&suspend_record->resume_info, &xen_start_info, sizeof(xen_start_info));
- HYPERVISOR_suspend(virt_to_machine(suspend_record) >> PAGE_SHIFT);
+ HYPERVISOR_suspend(__vms_virt_to_machine(suspend_record) >> PAGE_SHIFT);
HYPERVISOR_vm_assist(VMASST_CMD_enable,
VMASST_TYPE_4gb_segments);
memcpy(&xen_start_info, &suspend_record->resume_info, sizeof(xen_start_info));
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
-#else
set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
-#endif
HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
{
- pfn_to_mfn_frame_list[j] =
- virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+ __vms_pfn_to_mfn_frame_list[j] =
+ __vms_virt_to_machine(&__vms_phys_to_machine_mapping[i]) >> PAGE_SHIFT;
}
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
- virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+ __vms_virt_to_machine(__vms_pfn_to_mfn_frame_list) >> PAGE_SHIFT;
irq_resume();
BUG();
pfn = page - mem_map;
- if ( phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY )
+ if ( __vms_phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY )
BUG();
/* Update P->M and M->P tables. */
- phys_to_machine_mapping[pfn] = mfn_list[i];
+ __vms_phys_to_machine_mapping[pfn] = mfn_list[i];
queue_machphys_update(mfn_list[i], pfn);
/* Link back into the page tables if it's not a highmem page. */
}
pfn = page - mem_map;
- mfn_list[i] = phys_to_machine_mapping[pfn];
- phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
+ mfn_list[i] = __vms_phys_to_machine_mapping[pfn];
+ __vms_phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
if ( !PageHighMem(page) )
{
#else
mcl[i].args[3] = blkif->domid;
#endif
- phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
+ __vms_phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
FOREIGN_FRAME(phys_seg[i].buffer >> PAGE_SHIFT);
}
xreq->sector_number = req->sector_number;
for ( i = 0; i < req->nr_segments; i++ )
- xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
+ xreq->frame_and_sects[i] = __vms_machine_to_phys(req->frame_and_sects[i]);
}
static inline void translate_req_to_mfn(blkif_request_t *xreq,
xreq->sector_number = req->sector_number;
for ( i = 0; i < req->nr_segments; i++ )
- xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
+ xreq->frame_and_sects[i] = __vms_phys_to_machine(req->frame_and_sects[i]);
}
blkif_fe_interface_connect_t *msg = (void*)cmsg.msg;
msg->handle = 0;
- msg->shmem_frame = (virt_to_machine(blk_ring.sring) >> PAGE_SHIFT);
+ msg->shmem_frame = (__vms_virt_to_machine(blk_ring.sring) >> PAGE_SHIFT);
ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
}
for ( i = 0; i < req->nr_segments; i++ )
{
unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT;
- unsigned long mfn = phys_to_machine_mapping[pfn];
+ unsigned long mfn = __vms_phys_to_machine_mapping[pfn];
xen_machphys_update(mfn, pfn);
}
break;
memset(&req, 0, sizeof(req));
req.operation = BLKIF_OP_PROBE;
req.nr_segments = 1;
- req.frame_and_sects[0] = virt_to_machine(buf) | 7;
+ req.frame_and_sects[0] = __vms_virt_to_machine(buf) | 7;
blkif_control_send(&req, &rsp);
{
netif = netdev_priv(skb->dev);
vdata = (unsigned long)skb->data;
- mdata = virt_to_machine(vdata);
+ mdata = __vms_virt_to_machine(vdata);
/* Memory squeeze? Back off for an arbitrary while. */
if ( (new_mfn = alloc_mfn()) == 0 )
* Set the new P2M table entry before reassigning the old data page.
* Heed the comment in pgtable-2level.h:pte_page(). :-)
*/
- phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
+ __vms_phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
mmu[0].val = __pa(vdata) >> PAGE_SHIFT;
continue;
}
- phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
+ __vms_phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
- rx_pfn_array[i] = virt_to_machine(skb->head) >> PAGE_SHIFT;
+ rx_pfn_array[i] = __vms_virt_to_machine(skb->head) >> PAGE_SHIFT;
/* Remove this page from pseudo phys map before passing back to Xen. */
- phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
+ __vms_phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
= INVALID_P2M_ENTRY;
rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
tx->id = id;
- tx->addr = virt_to_machine(skb->data);
+ tx->addr = __vms_virt_to_machine(skb->data);
tx->size = skb->len;
wmb(); /* Ensure that backend will see the request. */
mcl->args[2] = 0;
mcl++;
- phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
+ __vms_phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
rx->addr >> PAGE_SHIFT;
__skb_queue_tail(&rxq, skb);
tx = &np->tx->ring[requeue_idx++].req;
tx->id = i;
- tx->addr = virt_to_machine(skb->data);
+ tx->addr = __vms_virt_to_machine(skb->data);
tx->size = skb->len;
np->stats.tx_bytes += skb->len;
netif_fe_interface_connect_t *msg = (void*)cmsg.msg;
msg->handle = np->handle;
- msg->tx_shmem_frame = (virt_to_machine(np->tx) >> PAGE_SHIFT);
- msg->rx_shmem_frame = (virt_to_machine(np->rx) >> PAGE_SHIFT);
+ msg->tx_shmem_frame = (__vms_virt_to_machine(np->tx) >> PAGE_SHIFT);
+ msg->rx_shmem_frame = (__vms_virt_to_machine(np->rx) >> PAGE_SHIFT);
ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
}
case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
{
- unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
+ unsigned long m2pv = (unsigned long)__vms_machine_to_phys_mapping;
pgd_t *pgd = pgd_offset_k(m2pv);
pmd_t *pmd = pmd_offset(pgd, m2pv);
unsigned long m2p_start_mfn = pmd_val(*pmd) >> PAGE_SHIFT;
static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
{
-#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), ((u32 *)&t->tls_array[i])[0], ((u32 *)&t->tls_array[i])[1])
+#define C(i) HYPERVISOR_update_descriptor(__pa(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), ((u32 *)&t->tls_array[i])[0], ((u32 *)&t->tls_array[i])[1])
C(0); C(1); C(2);
#undef C
}
extern void __set_fixmap (enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
-extern void __set_fixmap_ma (enum fixed_addresses idx,
+extern void __vms___set_fixmap_ma (enum fixed_addresses idx,
unsigned long mach, pgprot_t flags);
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
-#define set_fixmap_ma(idx, phys) \
- __set_fixmap_ma(idx, phys, PAGE_KERNEL)
-#define set_fixmap_ma_ro(idx, phys) \
- __set_fixmap_ma(idx, phys, PAGE_KERNEL_RO)
+#define __vms_set_fixmap_ma(idx, phys) \
+ __vms___set_fixmap_ma(idx, phys, PAGE_KERNEL)
+#define __vms_set_fixmap_ma_ro(idx, phys) \
+ __vms___set_fixmap_ma(idx, phys, PAGE_KERNEL_RO)
/*
* Some hardware wants to get fixmapped without caching.
*/
/*
* Change "struct page" to physical address.
*/
-#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
+#define __vms_page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#define __vms_page_to_machphys(page) (__vms_phys_to_machine(__vms_page_to_pseudophys(page)))
+#define page_to_phys(page) (__vms_page_to_machphys(page))
-#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
+#define __vms_bio_to_pseudophys(bio) (__vms_page_to_pseudophys(bio_page((bio))) + \
(unsigned long) bio_offset((bio)))
-#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
+#define __vms_bvec_to_pseudophys(bv) (__vms_page_to_pseudophys((bv)->bv_page) + \
+ (unsigned long) (bv)->bv_offset)
+#define __vms_bvec_to_machphys(bv) (__vms_page_to_machphys((bv)->bv_page) + \
(unsigned long) (bv)->bv_offset)
#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
- ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
- bvec_to_pseudophys((vec2))))
+ ((__vms_bvec_to_machphys((vec1)) + (vec1)->bv_len) == \
+ __vms_bvec_to_machphys((vec2))))
extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
*
* Allow them on x86 for legacy drivers, though.
*/
-#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-#define bus_to_virt(_x) __va(machine_to_phys(_x))
+#define virt_to_bus(_x) __vms_phys_to_machine(__pa(_x))
+#define bus_to_virt(_x) ({ BUG(); __va((_x)); })
/*
* readX/writeX() are used to access memory mapped devices. On some
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
+#ifndef BUG
+#include <asm/bug.h>
+#endif
+
#include <linux/config.h>
#include <linux/string.h>
#include <linux/types.h>
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-extern unsigned int *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)]))
-#define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)]))
-static inline unsigned long phys_to_machine(unsigned long phys)
+extern unsigned int *__vms_phys_to_machine_mapping;
+#define __vms_pfn_to_mfn(_pfn) ((unsigned long)(__vms_phys_to_machine_mapping[(_pfn)]))
+#define __vms_mfn_to_pfn(_mfn) ({ BUG(); ((unsigned long)(__vms_machine_to_phys_mapping[(_mfn)])); })
+static inline unsigned long __vms_phys_to_machine(unsigned long phys)
{
- unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+ unsigned long machine = __vms_pfn_to_mfn(phys >> PAGE_SHIFT);
machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
return machine;
}
-static inline unsigned long machine_to_phys(unsigned long machine)
+static inline unsigned long __vms_machine_to_phys(unsigned long machine)
{
- unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+ unsigned long phys = __vms_mfn_to_pfn(machine >> PAGE_SHIFT);
phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
return phys;
}
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define boot_pte_t pte_t /* or would you rather have a typedef */
-#define pte_val(x) (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
- (x).pte_low)
-#define pte_val_ma(x) ((x).pte_low)
+#define pte_val(x) ((x).pte_low)
+#define __vms_pte_val_ma(x) ((x).pte_low)
#define HPAGE_SHIFT 22
#endif
#define PTE_MASK PAGE_MASK
static inline unsigned long pmd_val(pmd_t x)
{
- unsigned long ret = x.pmd;
- if (ret) ret = machine_to_phys(ret);
- return ret;
+ return x.pmd;
}
#define pgd_val(x) ({ BUG(); (unsigned long)0; })
#define pgprot_val(x) ((x).pgprot)
static inline pte_t __pte(unsigned long x)
{
- if (x & 1) x = phys_to_machine(x);
return ((pte_t) { (x) });
}
-#define __pte_ma(x) ((pte_t) { (x) } )
static inline pmd_t __pmd(unsigned long x)
{
- if ((x & 1)) x = phys_to_machine(x);
return ((pmd_t) { (x) });
}
#define __pgd(x) ({ BUG(); (pgprot_t) { 0 }; })
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
/* VIRT <-> MACHINE conversion */
-#define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
-#define machine_to_virt(_m) (__va(machine_to_phys(_m)))
+#define __vms_virt_to_machine(_a) (__vms_phys_to_machine(__pa(_a)))
+#define __vms_machine_to_virt(_m) (__va(__vms_machine_to_phys(_m)))
#endif /* __KERNEL__ */
set_pmd(pmd, __pmd(_PAGE_TABLE +
((unsigned long long)page_to_pfn(pte) <<
(unsigned long long) PAGE_SHIFT)));
- flush_page_update_queue();
}
/*
* Allocate and free page tables.
static inline void pte_free_kernel(pte_t *pte)
{
free_page((unsigned long)pte);
- make_page_writable(pte);
- flush_page_update_queue();
}
extern void pte_free(struct page *pte);
* (pmds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
*/
-#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval).pmd)
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
#define set_pgd(pgdptr, pgdval) ((void)0)
#define pgd_page(pgd) \
{
pte_t pte = *xp;
if (pte.pte_low)
- set_pte(xp, __pte_ma(0));
+ set_pte(xp, __pte(0));
return pte;
}
*/
#define INVALID_P2M_ENTRY (~0U)
#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
-#define pte_pfn(_pte) \
-({ \
- unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
- unsigned long pfn = mfn_to_pfn(mfn); \
- if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
- pfn = max_mapnr; /* special: force !pfn_valid() */ \
- pfn; \
-})
+#define pte_pfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
#define pte_none(x) (!(x).pte_low)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/*
#define pmd_clear(xp) do { \
set_pmd(xp, __pmd(0)); \
- xen_flush_page_update_queue(); \
} while (0)
#ifndef CONFIG_DISCONTIGMEM
void make_pages_readonly(void *va, unsigned int nr);
void make_pages_writable(void *va, unsigned int nr);
-#define arbitrary_virt_to_machine(__va) \
+#define __vms_arbitrary_virt_to_machine(__va) \
({ \
pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
pmd_t *__pmd = pmd_offset(__pgd, (unsigned long)(__va)); \
pte_t *__pte = pte_offset_kernel(__pmd, (unsigned long)(__va)); \
unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK; \
- __pa | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
+ __vms_phys_to_machine(__pa) | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
+})
+
+#define arbitrary_virt_to_phys(__va) \
+({ \
+ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
+ pmd_t *__pmd = pmd_offset(__pgd, (unsigned long)(__va)); \
+ pte_t *__pte = pte_offset_kernel(__pmd, (unsigned long)(__va)); \
+ unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK; \
+ (__pa) | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
})
#endif /* !__ASSEMBLY__ */
* machine->physical mapping table starts at this address, read-only.
*/
#define HYPERVISOR_VIRT_START (0xFC000000UL)
-#ifndef machine_to_phys_mapping
-#define machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
+#ifndef __vms_machine_to_phys_mapping
+#define __vms_machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
#endif
#ifndef __ASSEMBLY__